}
#if P2M_AUDIT
-static void audit_p2m(struct p2m_domain *p2m);
+static void audit_p2m(struct p2m_domain *p2m, int strict_m2p);
#else
-# define audit_p2m(_p2m) do { (void)(_p2m); } while(0)
+# define audit_p2m(_p2m, _m2p) do { (void)(_p2m),(_m2p); } while (0)
#endif /* P2M_AUDIT */
// Find the next level's P2M entry, checking for out-of-range gfn's...
steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count );
p2m_lock(p2m);
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
if ( unlikely(d->is_dying) )
goto out_unlock;
}
out_unlock:
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
out:
*/
set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
p2m_populate_on_demand);
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
return 0;
}
if ( do_locking )
p2m_lock(p2m);
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
/* Check to make sure this is still PoD */
if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) != p2m_populate_on_demand )
r = p2m_pod_demand_populate(p2m, gfn, order, q);
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
if ( do_locking )
p2m_unlock(p2m);
}
#if P2M_AUDIT
-static void audit_p2m(struct p2m_domain *p2m)
+/* strict_m2p == 0 allows m2p mappings that don'#t match the p2m.
+ * It's intended for add_to_physmap, when the domain has just been allocated
+ * new mfns that might have stale m2p entries from previous owners */
+static void audit_p2m(struct p2m_domain *p2m, int strict_m2p)
{
struct page_info *page;
struct domain *od;
continue;
}
- if ( gfn == 0x55555555 )
+ if ( gfn == 0x55555555 || gfn == 0x5555555555555555 )
{
orphans_d++;
//P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n",
}
p2mfn = gfn_to_mfn_type_p2m(p2m, gfn, &type, p2m_query);
- if ( mfn_x(p2mfn) != mfn )
+ if ( strict_m2p && mfn_x(p2mfn) != mfn )
{
mpbad++;
P2M_PRINTK("map mismatch mfn %#lx -> gfn %#lx -> mfn %#lx"
// P2M_PRINTK("p2m audit found %lu orphans (%lu inval %lu debug)\n",
// orphans_i + orphans_d, orphans_i, orphans_d,
if ( mpbad | pmbad )
+ {
P2M_PRINTK("p2m audit found %lu odd p2m, %lu bad m2p entries\n",
pmbad, mpbad);
+ WARN();
+ }
}
#endif /* P2M_AUDIT */
unsigned long mfn, unsigned int page_order)
{
p2m_lock(p2m);
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
p2m_remove_page(p2m, gfn, mfn, page_order);
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
}
return rc;
p2m_lock(p2m);
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
BUG_ON(p2m->pod.entry_count < 0);
}
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
out:
return rc;
p2m_lock(p2m);
- audit_p2m(p2m);
+ audit_p2m(p2m, 0);
P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn);
}
}
- audit_p2m(p2m);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
return rc;
P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
p2m_lock(p2m);
rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
if ( 0 == rc )
gdprintk(XENLOG_ERR,
}
p2m_lock(p2m);
rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
return rc;
/* Fix p2m entry */
p2m_lock(p2m);
set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
ret = 0;
/* Remove mapping from p2m table */
p2m_lock(p2m);
set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
/* Put the page back so it gets freed */
{
p2m_lock(p2m);
set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
}
/* Fix p2m mapping */
p2m_lock(p2m);
set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
return 0;
mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
p2m_lock(p2m);
set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw);
+ audit_p2m(p2m, 1);
p2m_unlock(p2m);
/* Unpause domain */